host_state->msr_items[VMX_INDEX_MSR_EFER]);
set_bit(VMX_INDEX_MSR_EFER, &host_state->flags);
set_bit(VMX_INDEX_MSR_EFER, &msr->flags);
- wrmsrl(MSR_EFER, msr_content);
}
}
break;
return inst_len;
}
+unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
+{
+ switch ( num )
+ {
+ case 0:
+ return v->arch.hvm_vmx.cpu_cr0;
+ case 2:
+ return v->arch.hvm_vmx.cpu_cr2;
+ case 3:
+ return v->arch.hvm_vmx.cpu_cr3;
+ default:
+ BUG();
+ }
+ return 0; /* dummy */
+}
+
extern long evtchn_send(int lport);
void do_nmi(struct cpu_user_regs *);
hvm_funcs.realmode = vmx_realmode;
hvm_funcs.paging_enabled = vmx_paging_enabled;
hvm_funcs.instruction_length = vmx_instruction_length;
+ hvm_funcs.get_guest_ctrl_reg = vmx_get_ctrl_reg;
hvm_enabled = 1;
!vlapic_global_enabled((VLAPIC(v))) )
clear_bit(X86_FEATURE_APIC, &edx);
-#if CONFIG_PAGING_LEVELS >= 3
+#if CONFIG_PAGING_LEVELS < 3
+ clear_bit(X86_FEATURE_PSE, &edx);
+ clear_bit(X86_FEATURE_PAE, &edx);
+ clear_bit(X86_FEATURE_PSE36, &edx);
+#else
if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
-#endif
{
clear_bit(X86_FEATURE_PSE, &edx);
- clear_bit(X86_FEATURE_PAE, &edx);
clear_bit(X86_FEATURE_PSE36, &edx);
}
+#endif
/* Unsupportable for virtualised CPUs. */
ecx &= ~VMX_VCPU_CPUID_L1_RESERVED; /* mask off reserved bits */
v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
if (old_base_mfn)
put_page(mfn_to_page(old_base_mfn));
- update_pagetables(v);
/*
* arch.shadow_table should now hold the next CR3 for shadow
*/
v->arch.hvm_vmx.cpu_cr3 = c->cr3;
+ update_pagetables(v);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
__vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
}
v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
if (old_base_mfn)
put_page(mfn_to_page(old_base_mfn));
- update_pagetables(v);
/*
* arch.shadow_table should now hold the next CR3 for shadow
*/
+#if CONFIG_PAGING_LEVELS >= 3
+ if ( v->domain->arch.ops->guest_paging_levels == PAGING_L3 )
+ shadow_sync_all(v->domain);
+#endif
+
v->arch.hvm_vmx.cpu_cr3 = value;
+ update_pagetables(v);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx",
value);
__vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
}
case 4: /* CR4 */
{
- if (value & X86_CR4_PAE){
+ unsigned long old_cr4;
+
+ __vmread(CR4_READ_SHADOW, &old_cr4);
+
+ if ( value & X86_CR4_PAE && !(old_cr4 & X86_CR4_PAE) )
+ {
set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
- } else {
- if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
- &v->arch.hvm_vmx.cpu_state)){
- vmx_inject_exception(v, TRAP_gp_fault, 0);
+
+ if ( vmx_pgbit_test(v) )
+ {
+ /* The guest is 32 bit. */
+#if CONFIG_PAGING_LEVELS >= 4
+ unsigned long mfn, old_base_mfn;
+
+ if( !shadow_set_guest_paging_levels(v->domain, 3) )
+ {
+ printk("Unsupported guest paging levels\n");
+ domain_crash_synchronous(); /* need to take a clean path */
+ }
+
+ if ( !VALID_MFN(mfn = get_mfn_from_gpfn(
+ v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)) ||
+ !get_page(mfn_to_page(mfn), v->domain) )
+ {
+ printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
+ domain_crash_synchronous(); /* need to take a clean path */
+ }
+
+ old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
+ if ( old_base_mfn )
+ put_page(mfn_to_page(old_base_mfn));
+
+ /*
+ * Now arch.guest_table points to machine physical.
+ */
+
+ v->arch.guest_table = mk_pagetable((u64)mfn << PAGE_SHIFT);
+ update_pagetables(v);
+
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx",
+ (unsigned long) (mfn << PAGE_SHIFT));
+
+ __vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.shadow_table));
+
+ /*
+ * arch->shadow_table should hold the next CR3 for shadow
+ */
+
+ HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
+ v->arch.hvm_vmx.cpu_cr3, mfn);
+#endif
+ }
+ else
+ {
+ /* The guest is 64 bit. */
+#if CONFIG_PAGING_LEVELS >= 4
+ if ( !shadow_set_guest_paging_levels(v->domain, 4) )
+ {
+ printk("Unsupported guest paging levels\n");
+ domain_crash_synchronous(); /* need to take a clean path */
+ }
+#endif
}
+ }
+ else if ( value & X86_CR4_PAE )
+ set_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
+ else
+ {
+ if ( test_bit(VMX_CPU_STATE_LMA_ENABLED, &v->arch.hvm_vmx.cpu_state) )
+ vmx_inject_exception(v, TRAP_gp_fault, 0);
+
clear_bit(VMX_CPU_STATE_PAE_ENABLED, &v->arch.hvm_vmx.cpu_state);
}
__vmread(CR4_READ_SHADOW, &old_cr);
-
__vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
__vmwrite(CR4_READ_SHADOW, value);
* Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
* all TLB entries except global entries.
*/
- if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE)) {
+ if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
shadow_sync_all(v->domain);
- }
+
break;
}
default:
#if CONFIG_PAGING_LEVELS == 3
static unsigned long shadow_l3_table(
- struct domain *d, unsigned long gpfn, unsigned long gmfn);
+ struct vcpu *v, unsigned long gpfn, unsigned long gmfn);
#endif
#if CONFIG_PAGING_LEVELS == 4
static unsigned long shadow_l4_table(
- struct domain *d, unsigned long gpfn, unsigned long gmfn);
+ struct vcpu *v, unsigned long gpfn, unsigned long gmfn);
#endif
#if CONFIG_PAGING_LEVELS >= 3
unsigned long va, unsigned int from, unsigned int to);
static inline void validate_bl2e_change( struct domain *d,
guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index);
+static void update_top_level_shadow(struct vcpu *v, unsigned long smfn);
#endif
/********
u32 psh_type)
{
struct page_info *page;
- unsigned long smfn;
+ unsigned long smfn, real_gpfn;
int pin = 0;
void *l1, *lp;
break;
case PGT_l4_shadow:
- if ( !shadow_promote(d, gpfn, gmfn, psh_type) )
+ real_gpfn = gpfn & PGT_mfn_mask;
+ if ( !shadow_promote(d, real_gpfn, gmfn, psh_type) )
goto fail;
perfc_incr(shadow_l4_pages);
d->arch.shadow_page_count++;
* Might be worth investigating...
*/
static unsigned long shadow_l2_table(
- struct domain *d, unsigned long gpfn, unsigned long gmfn)
+ struct vcpu *v, unsigned long gpfn, unsigned long gmfn)
{
unsigned long smfn;
l2_pgentry_t *spl2e;
+ struct domain *d = v->domain;
int i;
SH_VVLOG("shadow_l2_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);
__rw_entry(v, va, &sle, SHADOW_ENTRY | GET_ENTRY | i);
}
}
- if ( i < PAGING_L4 )
- shadow_update_min_max(entry_get_pfn(sle_up), table_offset_64(va, i));
+ if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
+ {
+ if ( i < PAGING_L3 )
+ shadow_update_min_max(entry_get_pfn(sle_up), table_offset_64(va, i));
+ }
+ else
+ {
+ if ( i < PAGING_L4 )
+ shadow_update_min_max(entry_get_pfn(sle_up), table_offset_64(va, i));
+ }
+
sle_up = sle;
}
unsigned long gmfn;
unsigned long gpfn;
int i;
+ unsigned int base_idx = 0;
+ base_idx = get_cr3_idxval(v);
gmfn = l2mfn;
gpfn = l2pfn;
if ( page_out_of_sync(mfn_to_page(gmfn)) &&
!snapshot_entry_matches(
- d, guest_pt, gpfn, table_offset_64(va, i)) )
+ d, guest_pt, gpfn, guest_table_offset_64(va, i, base_idx)) )
{
unmap_and_return (1);
}
return 0;
}
+static void resync_pae_guest_l3(struct domain *d)
+{
+ struct out_of_sync_entry *entry;
+ unsigned long i, idx;
+ unsigned long smfn, gmfn;
+ pgentry_64_t *guest, *shadow_l3, *snapshot;
+ struct vcpu *v = current;
+ int max = -1;
+ int unshadow = 0;
+
+
+ ASSERT( shadow_mode_external(d) );
+
+ gmfn = pagetable_get_pfn(v->arch.guest_table);
+
+ for ( entry = d->arch.out_of_sync; entry; entry = entry->next )
+ {
+ if ( entry->snapshot_mfn == SHADOW_SNAPSHOT_ELSEWHERE )
+ continue;
+ if ( entry->gmfn != gmfn )
+ continue;
+
+ idx = get_cr3_idxval(v);
+ smfn = __shadow_status(
+ d, ((unsigned long)(idx << PGT_score_shift) | entry->gpfn), PGT_l4_shadow);
+
+#ifndef NDEBUG
+ if ( !smfn )
+ {
+ BUG();
+ }
+#endif
+
+ guest = (pgentry_64_t *)map_domain_page(entry->gmfn);
+ snapshot = (pgentry_64_t *)map_domain_page(entry->snapshot_mfn);
+ shadow_l3 = (pgentry_64_t *)map_domain_page(smfn);
+
+ for ( i = 0; i < PAE_L3_PAGETABLE_ENTRIES; i++ )
+ {
+ int index = i + idx * PAE_L3_PAGETABLE_ENTRIES;
+ if ( entry_has_changed(
+ guest[index], snapshot[index], PAGE_FLAG_MASK) )
+ {
+ validate_entry_change(d, &guest[index],
+ &shadow_l3[i], PAGING_L3);
+ }
+ if ( entry_get_value(guest[index]) != 0 )
+ max = i;
+
+ if ( !(entry_get_flags(guest[index]) & _PAGE_PRESENT) &&
+ unlikely(entry_get_value(guest[index]) != 0) &&
+ !unshadow &&
+ (frame_table[smfn].u.inuse.type_info & PGT_pinned) )
+ unshadow = 1;
+
+ }
+ if ( max == -1 )
+ unshadow = 1;
+
+ unmap_domain_page(guest);
+ unmap_domain_page(snapshot);
+ unmap_domain_page(shadow_l3);
+
+ if ( unlikely(unshadow) )
+ shadow_unpin(smfn);
+ break;
+ }
+}
static int resync_all(struct domain *d, u32 stype)
{
return need_flush;
}
+#if CONFIG_PAGING_LEVELS == 2
+static int resync_all_levels_guest_page(struct domain *d)
+{
+ int need_flush = 0;
+
+ need_flush |= resync_all(d, PGT_l1_shadow);
+ if ( d->arch.ops->guest_paging_levels == PAGING_L2 &&
+ shadow_mode_translate(d) )
+ {
+ need_flush |= resync_all(d, PGT_hl2_shadow);
+ }
+ return need_flush;
+}
+#elif CONFIG_PAGING_LEVELS == 3
+static int resync_all_levels_guest_page(struct domain *d)
+{
+ int need_flush = 0;
+
+ need_flush |= resync_all(d, PGT_l1_shadow);
+ if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
+ need_flush |= resync_all(d, PGT_l4_shadow);
+ else
+ {
+ need_flush |= resync_all(d, PGT_l2_shadow);
+ if ( shadow_mode_log_dirty(d) )
+ {
+ need_flush |= resync_all(d, PGT_l3_shadow);
+ need_flush |= resync_all(d, PGT_l4_shadow);
+ }
+ else
+ resync_pae_guest_l3(d);
+ }
+
+ return need_flush;
+}
+#elif CONFIG_PAGING_LEVELS == 4
+static int resync_all_levels_guest_page(struct domain *d)
+{
+ int need_flush = 0;
+
+ need_flush |= resync_all(d, PGT_l1_shadow);
+ if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
+ need_flush |= resync_all(d, PGT_l4_shadow);
+ else
+ {
+ need_flush |= resync_all(d, PGT_l2_shadow);
+ if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
+ resync_pae_guest_l3(d);
+ else
+ {
+ need_flush |= resync_all(d, PGT_l3_shadow);
+ need_flush |= resync_all(d, PGT_l4_shadow);
+ }
+ }
+ return need_flush;
+}
+#endif
+
static void sync_all(struct domain *d)
{
struct out_of_sync_entry *entry;
/* Flush ourself later. */
need_flush = 1;
- /* Second, resync all L1 pages, then L2 pages, etc... */
- need_flush |= resync_all(d, PGT_l1_shadow);
-
-#if CONFIG_PAGING_LEVELS == 2
- if ( d->arch.ops->guest_paging_levels == PAGING_L2 &&
- shadow_mode_translate(d) )
- {
- need_flush |= resync_all(d, PGT_hl2_shadow);
- }
-#endif
-
-#if CONFIG_PAGING_LEVELS >= 3
- if ( d->arch.ops->guest_paging_levels == PAGING_L2 )
- need_flush |= resync_all(d, PGT_l4_shadow);
- else
- need_flush |= resync_all(d, PGT_l2_shadow);
-
- if ( d->arch.ops->guest_paging_levels >= PAGING_L3 )
- {
- need_flush |= resync_all(d, PGT_l3_shadow);
- need_flush |= resync_all(d, PGT_l4_shadow);
- }
-#endif
+ need_flush |= resync_all_levels_guest_page(d);
if ( need_flush && !unlikely(shadow_mode_external(d)) )
local_flush_tlb();
v->arch.guest_vtable = map_domain_page_global(gmfn);
}
+#if CONFIG_PAGING_LEVELS >= 3
+ /*
+ * Handle 32-bit PAE enabled guest
+ */
+ if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 )
+ {
+ u32 index = get_cr3_idxval(v);
+ gpfn = (index << PGT_score_shift) | gpfn;
+ }
+#endif
+
/*
* arch.shadow_table
*/
if ( unlikely(!(smfn = __shadow_status(d, gpfn, PGT_base_page_table))) )
{
#if CONFIG_PAGING_LEVELS == 2
- smfn = shadow_l2_table(d, gpfn, gmfn);
+ smfn = shadow_l2_table(v, gpfn, gmfn);
#elif CONFIG_PAGING_LEVELS == 3
- smfn = shadow_l3_table(d, gpfn, gmfn);
+ smfn = shadow_l3_table(v, gpfn, gmfn);
#elif CONFIG_PAGING_LEVELS == 4
- smfn = shadow_l4_table(d, gpfn, gmfn);
+ smfn = shadow_l4_table(v, gpfn, gmfn);
#endif
}
else
{
+#if CONFIG_PAGING_LEVELS >= 3
+ if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 )
+ update_top_level_shadow(v, smfn);
+#endif
/*
* move sync later in order to avoid this smfn been
* unshadowed occasionally
#if CONFIG_PAGING_LEVELS == 3
static unsigned long shadow_l3_table(
- struct domain *d, unsigned long gpfn, unsigned long gmfn)
+ struct vcpu *v, unsigned long gpfn, unsigned long gmfn)
{
unsigned long smfn;
l3_pgentry_t *spl3e;
+ struct domain *d = v->domain;
perfc_incrc(shadow_l3_table_count);
- SH_VVLOG("shadow_l4_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);
+ SH_VVLOG("shadow_l3_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);
if ( SH_L1_HAS_NEXT_PAGE &&
d->arch.ops->guest_paging_levels == PAGING_L2 )
}
#endif /* CONFIG_PAGING_LEVELS == 3 */
-#ifndef GUEST_PGENTRY_32
+#if (!defined(GUEST_PGENTRY_32) && !defined(GUEST_32PAE))
static unsigned long gva_to_gpa_pae(unsigned long gva)
{
BUG();
#if CONFIG_PAGING_LEVELS == 4
static unsigned long shadow_l4_table(
- struct domain *d, unsigned long gpfn, unsigned long gmfn)
+ struct vcpu *v, unsigned long gpfn, unsigned long gmfn)
{
unsigned long smfn;
l4_pgentry_t *spl4e;
+ struct domain *d = v->domain;
SH_VVLOG("shadow_l4_table(gpfn=%lx, gmfn=%lx)", gpfn, gmfn);
spl4e = (l4_pgentry_t *)map_domain_page(smfn);
+ /* For 32-bit PAE guest on 64-bit host */
+ if ( SH_GUEST_32PAE && d->arch.ops->guest_paging_levels == PAGING_L3 )
+ {
+ unsigned long index;
+ /*
+ * Shadow L4's pfn_info->tlbflush_timestamp
+ * should also save it's own index.
+ */
+ index = get_cr3_idxval(v);
+ frame_table[smfn].tlbflush_timestamp = index;
+
+ memset(spl4e, 0, L4_PAGETABLE_ENTRIES*sizeof(l4_pgentry_t));
+ /* Map the self entry */
+ spl4e[PAE_SHADOW_SELF_ENTRY] = l4e_from_pfn(smfn, __PAGE_HYPERVISOR);
+ unmap_domain_page(spl4e);
+ return smfn;
+ }
+
/* Install hypervisor and 4x linear p.t. mapings. */
if ( (PGT_base_page_table == PGT_l4_page_table) &&
!shadow_mode_external(d) )
#endif /* CONFIG_PAGING_LEVELS == 4 */
#if CONFIG_PAGING_LEVELS >= 3
+static void
+update_top_level_shadow(struct vcpu *v, unsigned long smfn)
+{
+ unsigned long index = get_cr3_idxval(v);
+ pgentry_64_t *sple = (pgentry_64_t *)map_domain_page(smfn);
+ pgentry_64_t *gple = (pgentry_64_t *)&v->arch.guest_vtable;
+ int i;
+
+ for ( i = 0; i < PAE_L3_PAGETABLE_ENTRIES; i++ )
+ validate_entry_change(
+ v->domain, &gple[index*4+i], &sple[i], PAGING_L3);
+
+ unmap_domain_page(sple);
+}
+
/*
* validate_bl2e_change()
* The code is for 32-bit HVM guest on 64-bit host.
pgentry_64_t gle = { 0 };
unsigned long gpfn = 0, mfn;
int i;
+ unsigned int base_idx = 0;
+ base_idx = get_cr3_idxval(v);
ASSERT( d->arch.ops->guest_paging_levels >= PAGING_L3 );
#if CONFIG_PAGING_LEVELS >= 3
if ( d->arch.ops->guest_paging_levels == PAGING_L3 )
{
- gpfn = pagetable_get_pfn(v->arch.guest_table);
+ if ( SH_GUEST_32PAE )
+ gpfn = hvm_get_guest_ctrl_reg(v, 3);
+ else
+ gpfn = pagetable_get_pfn(v->arch.guest_table);
}
#endif
mfn = gmfn_to_mfn(d, gpfn);
lva = (pgentry_64_t *) map_domain_page(mfn);
- gle = lva[table_offset_64(va, i)];
+ gle = lva[guest_table_offset_64(va, i, base_idx)];
+
unmap_domain_page(lva);
gpfn = entry_get_pfn(gle);
* The naming convention of the shadow_ops:
* MODE_<pgentry size>_<guest paging levels>_HANDLER
*/
-#ifndef GUEST_PGENTRY_32
+#if (!defined(GUEST_PGENTRY_32) && !defined(GUEST_32PAE))
struct shadow_ops MODE_64_3_HANDLER = {
.guest_paging_levels = 3,
.invlpg = shadow_invlpg_64,
#endif
#if ( CONFIG_PAGING_LEVELS == 3 && !defined (GUEST_PGENTRY_32) ) || \
- ( CONFIG_PAGING_LEVELS == 4 && defined (GUEST_PGENTRY_32) )
+ ( CONFIG_PAGING_LEVELS == 4 && defined (GUEST_PGENTRY_32) )
/*
#define _XEN_SHADOW_64_H
#include <asm/shadow.h>
#include <asm/shadow_ops.h>
+#include <asm/hvm/hvm.h>
/*
* The naming convention of the shadow_ops:
extern struct shadow_ops MODE_64_3_HANDLER;
#if CONFIG_PAGING_LEVELS == 4
extern struct shadow_ops MODE_64_4_HANDLER;
+extern struct shadow_ops MODE_64_PAE_HANDLER;
#endif
#if CONFIG_PAGING_LEVELS == 3
#define PAE_SHADOW_SELF_ENTRY 259
#define PAE_L3_PAGETABLE_ENTRIES 4
+/******************************************************************************/
+/*
+ * The macro and inlines are for 32-bit PAE guest on 64-bit host
+ */
+#define PAE_CR3_ALIGN 5
+#define PAE_CR3_IDX_MASK 0x7f
+#define PAE_CR3_IDX_NO 128
+
+/******************************************************************************/
static inline int table_offset_64(unsigned long va, int level)
{
switch(level) {
#if CONFIG_PAGING_LEVELS >= 4
#ifndef GUEST_PGENTRY_32
+#ifndef GUEST_32PAE
case 4:
return (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
+#else
+ case 4:
+ return PAE_SHADOW_SELF_ENTRY;
+#endif
#else
case 4:
return PAE_SHADOW_SELF_ENTRY;
}
}
+/*****************************************************************************/
+
+#if defined( GUEST_32PAE )
+static inline int guest_table_offset_64(unsigned long va, int level, unsigned int index)
+{
+ switch(level) {
+ case 1:
+ return (((va) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1));
+ case 2:
+ return (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1));
+ case 3:
+ return (index * 4 + ((va) >> L3_PAGETABLE_SHIFT));
+#if CONFIG_PAGING_LEVELS == 3
+ case 4:
+ return PAE_SHADOW_SELF_ENTRY;
+#endif
+
+#if CONFIG_PAGING_LEVELS >= 4
+#ifndef GUEST_PGENTRY_32
+ case 4:
+ return (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
+#else
+ case 4:
+ return PAE_SHADOW_SELF_ENTRY;
+#endif
+#endif
+ default:
+ return -1;
+ }
+}
+
+static inline unsigned long get_cr3_idxval(struct vcpu *v)
+{
+ unsigned long pae_cr3 = hvm_get_guest_ctrl_reg(v, 3); /* get CR3 */
+
+ return (pae_cr3 >> PAE_CR3_ALIGN) & PAE_CR3_IDX_MASK;
+}
+
+
+#define SH_GUEST_32PAE 1
+#else
+#define guest_table_offset_64(va, level, index) \
+ table_offset_64((va),(level))
+#define get_cr3_idxval(v) 0
+#define SH_GUEST_32PAE 0
+#endif
+
+/********************************************************************************/
+
static inline void free_out_of_sync_state(struct domain *d)
{
struct out_of_sync_entry *entry;
u32 level = flag & L_MASK;
struct domain *d = v->domain;
int root_level;
+ unsigned int base_idx;
+
+ base_idx = get_cr3_idxval(v);
if ( flag & SHADOW_ENTRY )
{
else if ( flag & GUEST_ENTRY )
{
root_level = v->domain->arch.ops->guest_paging_levels;
- index = table_offset_64(va, root_level);
+ if ( root_level == PAGING_L3 )
+ index = guest_table_offset_64(va, PAGING_L3, base_idx);
+ else
+ index = guest_table_offset_64(va, root_level, base_idx);
le_e = (pgentry_64_t *)&v->arch.guest_vtable[index];
}
else /* direct mode */
if ( le_p )
unmap_domain_page(le_p);
le_p = (pgentry_64_t *)map_domain_page(mfn);
- index = table_offset_64(va, (level + i - 1));
+ if ( flag & SHADOW_ENTRY )
+ index = table_offset_64(va, (level + i - 1));
+ else
+ index = guest_table_offset_64(va, (level + i - 1), base_idx);
le_e = &le_p[index];
}